[SVM] Remove ASID logic. Errata prevent this feature being used
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Mon, 22 Jan 2007 14:13:26 +0000 (14:13 +0000)
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Mon, 22 Jan 2007 14:13:26 +0000 (14:13 +0000)
reliably in current SVM processor implementations.
Signed-off-by: Keir Fraser <keir@xensource.com>
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/svm/vmcb.c
xen/arch/x86/hvm/svm/x86_32/exits.S
xen/arch/x86/hvm/svm/x86_64/exits.S
xen/include/asm-x86/hvm/svm/svm.h
xen/include/asm-x86/hvm/svm/vmcb.h

index fcb08ee5f6068d122f5047355bca8e254c03c2b1..91f9e6afaa40ec7169a91b6f95ca4ec7ab3b4e8e 100644 (file)
@@ -74,108 +74,6 @@ static void *root_vmcb[NR_CPUS] __read_mostly;
 /* physical address of above for host VMSAVE/VMLOAD */
 u64 root_vmcb_pa[NR_CPUS] __read_mostly;
 
-
-/* ASID API */
-enum {
-    ASID_AVAILABLE = 0,
-    ASID_INUSE,
-    ASID_RETIRED
-};
-#define   INITIAL_ASID      0
-#define   ASID_MAX          64
-struct asid_pool {
-    spinlock_t asid_lock;
-    u32 asid[ASID_MAX];
-};
-
-static DEFINE_PER_CPU(struct asid_pool, asid_pool);
-
-
-/*
- * Initializes the POOL of ASID used by the guests per core.
- */
-void asidpool_init(int core)
-{
-    int i;
-
-    spin_lock_init(&per_cpu(asid_pool,core).asid_lock);
-
-    /* Host ASID is always in use */
-    per_cpu(asid_pool,core).asid[INITIAL_ASID] = ASID_INUSE;
-    for ( i = 1; i < ASID_MAX; i++ )
-        per_cpu(asid_pool,core).asid[i] = ASID_AVAILABLE;
-}
-
-
-/* internal function to get the next available ASID */
-static int asidpool_fetch_next(struct vmcb_struct *vmcb, int core)
-{
-    int i;  
-    for ( i = 1; i < ASID_MAX; i++ )
-    {
-        if ( per_cpu(asid_pool,core).asid[i] == ASID_AVAILABLE )
-        {
-            vmcb->guest_asid = i;
-            per_cpu(asid_pool,core).asid[i] = ASID_INUSE;
-            return i;
-        }
-    }
-    return -1;
-}
-
-
-/*
- * This functions assigns on the passed VMCB, the next
- * available ASID number. If none are available, the
- * TLB flush flag is set, and all retireds ASID
- * are made available. 
- *
- *  Returns: 1 -- sucess;
- *           0 -- failure -- no more ASID numbers 
- *                           available.
- */
-int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
-                          int oldcore, int newcore )
-{
-    int i;
-    int res = 1;
-    static unsigned long cnt=0;
-
-    spin_lock(&per_cpu(asid_pool,oldcore).asid_lock);
-    if( retire_current && vmcb->guest_asid ) {
-        per_cpu(asid_pool,oldcore).asid[vmcb->guest_asid & (ASID_MAX-1)] = 
-            ASID_RETIRED;
-    }
-    spin_unlock(&per_cpu(asid_pool,oldcore).asid_lock);
-    spin_lock(&per_cpu(asid_pool,newcore).asid_lock);
-    if( asidpool_fetch_next( vmcb, newcore ) < 0 ) {
-        if (svm_dbg_on)
-            printk( "SVM: tlb(%ld)\n", cnt++ );
-        /* FLUSH the TLB and all retired slots are made available */ 
-        vmcb->tlb_control = 1;
-        for( i = 1; i < ASID_MAX; i++ ) {
-            if( per_cpu(asid_pool,newcore).asid[i] == ASID_RETIRED ) {
-                per_cpu(asid_pool,newcore).asid[i] = ASID_AVAILABLE;
-            }
-        }
-        /* Get the First slot available */ 
-        res = asidpool_fetch_next( vmcb, newcore ) > 0;
-    }
-    spin_unlock(&per_cpu(asid_pool,newcore).asid_lock);
-    return res;
-}
-
-void asidpool_retire( struct vmcb_struct *vmcb, int core )
-{
-    spin_lock(&per_cpu(asid_pool,core).asid_lock);
-    if( vmcb->guest_asid ) {
-        per_cpu(asid_pool,core).asid[vmcb->guest_asid & (ASID_MAX-1)] = 
-            ASID_RETIRED;
-    }
-    spin_unlock(&per_cpu(asid_pool,core).asid_lock);
-}
-
 static inline void svm_inject_exception(struct vcpu *v, int trap, 
                                         int ev, int error_code)
 {
@@ -851,7 +749,6 @@ int start_svm(void)
     rdmsr(MSR_EFER, eax, edx);
     eax |= EFER_SVME;
     wrmsr(MSR_EFER, eax, edx);
-    asidpool_init( cpu );    
     printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
 
     /* Initialize the HSA for this core */
@@ -920,28 +817,11 @@ void arch_svm_do_resume(struct vcpu *v)
 
 static int svm_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 
 {
-    struct vcpu *v = current;
-    unsigned long eip;
-    int result;
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
-    ASSERT(vmcb);
-
-//#if HVM_DEBUG
-    eip = vmcb->rip;
     HVM_DBG_LOG(DBG_LEVEL_VMMU, 
                 "svm_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
-                va, eip, (unsigned long)regs->error_code);
-//#endif
-
-    result = shadow_fault(va, regs); 
-
-    if( result ) {
-        /* Let's make sure that the Guest TLB is flushed */
-        set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
-    }
-
-    return result;
+                va, (unsigned long)current->arch.hvm_svm.vmcb->rip,
+                (unsigned long)regs->error_code);
+    return shadow_fault(va, regs); 
 }
 
 
@@ -1578,8 +1458,6 @@ static int svm_set_cr0(unsigned long value)
 
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", 
                     (unsigned long) (mfn << PAGE_SHIFT));
-
-        set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
     }
 
     if ( !((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled )
@@ -1600,7 +1478,6 @@ static int svm_set_cr0(unsigned long value)
             return 0;
         }
         shadow_update_paging_modes(v);
-        set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
     }
     else if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PE )
     {
@@ -1611,7 +1488,6 @@ static int svm_set_cr0(unsigned long value)
         }
         /* we should take care of this kind of situation */
         shadow_update_paging_modes(v);
-        set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
     }
 
     return 1;
@@ -1702,7 +1578,6 @@ static int mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs)
             v->arch.hvm_svm.cpu_cr3 = value;
             break;
         }
-        set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
 
         /* We make a new one if the shadow does not exist. */
         if (value == v->arch.hvm_svm.cpu_cr3) 
@@ -1795,10 +1670,7 @@ static int mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs)
          * all TLB entries except global entries.
          */
         if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE))
-        {
-            set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
             shadow_update_paging_modes(v);
-        }
         break;
 
     case 8:
@@ -2140,8 +2012,6 @@ void svm_handle_invlpg(const short invlpga, struct cpu_user_regs *regs)
         __update_guest_eip (vmcb, inst_len);
     }
 
-    /* Overkill, we may not this */
-    set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
     shadow_invlpg(v, g_vaddr);
 }
 
@@ -2892,31 +2762,6 @@ asmlinkage void svm_load_cr2(void)
     local_irq_disable();
     asm volatile("mov %0,%%cr2": :"r" (v->arch.hvm_svm.cpu_cr2));
 }
-
-asmlinkage void svm_asid(void)
-{
-    struct vcpu *v = current;
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
-    /*
-     * if need to assign new asid, or if switching cores,
-     * retire asid for the old core, and assign a new asid to the current core.
-     */
-    if ( test_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags ) ||
-         ( v->arch.hvm_svm.asid_core != v->arch.hvm_svm.launch_core )) {
-        /* recycle asid */
-        if ( !asidpool_assign_next(vmcb, 1,
-                                   v->arch.hvm_svm.asid_core,
-                                   v->arch.hvm_svm.launch_core) )
-        {
-            /* If we get here, we have a major problem */
-            domain_crash_synchronous();
-        }
-
-        v->arch.hvm_svm.asid_core = v->arch.hvm_svm.launch_core;
-        clear_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags );
-    }
-}
   
 /*
  * Local variables:
index b32db1b62c01b34a7cec3a0346f03f4364be9a36..6683bc9d18daf3861bcd8a4272b925e1385177a9 100644 (file)
@@ -38,8 +38,6 @@
 #include <xen/keyhandler.h>
 
 extern int svm_dbg_on;
-extern int asidpool_assign_next(
-    struct vmcb_struct *vmcb, int retire_current, int oldcore, int newcore);
 
 #define GUEST_SEGMENT_LIMIT 0xffffffff
 
@@ -92,8 +90,9 @@ static int construct_vmcb(struct vcpu *v)
     struct vmcb_struct *vmcb = arch_svm->vmcb;
     svm_segment_attributes_t attrib;
 
-    /* Always flush the TLB on VMRUN. */
+    /* Always flush the TLB on VMRUN. All guests share a single ASID (1). */
     vmcb->tlb_control = 1;
+    vmcb->guest_asid  = 1;
 
     /* SVM intercepts. */
     vmcb->general1_intercepts = 
@@ -240,10 +239,7 @@ void svm_destroy_vmcb(struct vcpu *v)
     struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
 
     if ( arch_svm->vmcb != NULL )
-    {
-        asidpool_retire(arch_svm->vmcb, arch_svm->asid_core);
         free_vmcb(arch_svm->vmcb);
-    }
 
     if ( arch_svm->iopm != NULL )
     {
@@ -264,16 +260,10 @@ void svm_destroy_vmcb(struct vcpu *v)
 
 void svm_do_launch(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    int core = smp_processor_id();
-
     hvm_stts(v);
 
     /* current core is the one we intend to perform the VMRUN on */
-    v->arch.hvm_svm.launch_core = v->arch.hvm_svm.asid_core = core;
-    clear_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
-    if ( !asidpool_assign_next(vmcb, 0, core, core) )
-        BUG();
+    v->arch.hvm_svm.launch_core = smp_processor_id();
 
     v->arch.schedule_tail = arch_svm_do_resume;
 }
index 2cd913e16b1f481e6de9dae120336edb03d19cc1..c465e3b7fa6318befad23c90cfb8d206d262c9a9 100644 (file)
@@ -150,7 +150,6 @@ svm_test_all_events:
         jnz  svm_process_softirqs
 svm_restore_all_guest:
         call svm_intr_assist
-        call svm_asid
         call svm_load_cr2
         /* 
          * Check if we are going back to AMD-V based VM
index 0c9aa641a3d8a93a0897db38e149e3e6b426f698..042cd6dc8e49be84dc729c6b7560e5f73d072aa1 100644 (file)
@@ -163,7 +163,6 @@ svm_test_all_events:
         jnz   svm_process_softirqs
 svm_restore_all_guest:
         call svm_intr_assist
-        call svm_asid
         call svm_load_cr2
         /*
          * Check if we are going back to AMD-V based VM
index a97daae850518dda9d1e362d82335ceb95bb26f9..da906d55e40135cda4ae5fdbef26d9d3a069a7f0 100644 (file)
@@ -28,7 +28,6 @@
 #include <asm/hvm/svm/vmcb.h>
 #include <asm/i387.h>
 
-extern void asidpool_retire(struct vmcb_struct *vmcb, int core);
 extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
 extern void svm_do_launch(struct vcpu *v);
 extern void arch_svm_do_resume(struct vcpu *v);
index 3340ec3a9ee4b8c2fddc765982552b668835866b..65ac95e4eba13982a79a704a29baad6e2d9c0913 100644 (file)
@@ -457,7 +457,6 @@ struct arch_svm_struct {
     u64                 vmexit_tsc; /* tsc read at #VMEXIT. for TSC_OFFSET */
     int                 saved_irq_vector;
     u32                 launch_core;
-    u32                 asid_core;
     
     unsigned long       flags;      /* VMCB flags */
     unsigned long       cpu_shadow_cr0; /* Guest value for CR0 */
@@ -477,17 +476,6 @@ void svm_destroy_vmcb(struct vcpu *v);
 
 void setup_vmcb_dump(void);
 
-#define VMCB_USE_HOST_ENV       1
-#define VMCB_USE_SEPARATE_ENV   0
-
-enum {
-    ARCH_SVM_VMCB_LOADED = 0,
-    ARCH_SVM_VMCB_ASSIGN_ASID
-};
-
-#define VMCB_EFLAGS_RESERVED_0          0xffc08028 /* bitmap for 0 */
-#define VMCB_EFLAGS_RESERVED_1          0x00000002 /* bitmap for 1 */
-
 /* These bits in the CR4 are owned by the host */
 #if CONFIG_PAGING_LEVELS >= 3
 #define SVM_CR4_HOST_MASK (X86_CR4_PAE)